@InProceedings{Jung:2018:ReGeDe,
author = "zeni, luis felipe de araujo and Jung, Claudio Rosito",
affiliation = "{universidade federal do rio grande do sul} and {universidade
federal do rio grande do sul}",
title = "Real-Time Gender Detection in the Wild Using Deep Neural
Networks",
booktitle = "Proceedings...",
year = "2018",
editor = "Ross, Arun and Gastal, Eduardo S. L. and Jorge, Joaquim A. and
Queiroz, Ricardo L. de and Minetto, Rodrigo and Sarkar, Sudeep and
Papa, Jo{\~a}o Paulo and Oliveira, Manuel M. and Arbel{\'a}ez,
Pablo and Mery, Domingo and Oliveira, Maria Cristina Ferreira de
and Spina, Thiago Vallin and Mendes, Caroline Mazetto and Costa,
Henrique S{\'e}rgio Gutierrez and Mejail, Marta Estela and Geus,
Klaus de and Scheer, Sergio",
organization = "Conference on Graphics, Patterns and Images, 31. (SIBGRAPI)",
publisher = "IEEE Computer Society",
address = "Los Alamitos",
keywords = "deep learning, computer vision, gender detection, real-time.",
abstract = "Gender recognition can be used in many applications, such as video
surveillance, human-computer interaction and customized
advertisement. Current state-of-the-art gender recognition methods
are detector-dependent or region-dependent, focusing mostly on
facial features (a face detector is typically required). These
limitations do not allow an end-to-end training pipeline, and many
features used in the detection phase must be re-learned in the
classification step. Furthermore, the use of facial features
limits the application of such methods in the wild, where the face
might not be present. This paper presents a real-time end-to-end
gender detector based on deep neural networks. The proposed method
detects and recognizes the gender of persons in the wild, meaning
in images with a high variability in pose, illumination an
occlusions. To train and evaluate the results a new annotation set
of Pascal VOC 2007 and CelebA were created. Our experimental
results indicate that combining both datasets during training can
increase the mAp of our gender detector. We also visually analyze
which parts leads our network to make mistakes and the bias
introduced by the training data.",
conference-location = "Foz do Igua{\c{c}}u, PR, Brazil",
conference-year = "29 Oct.-1 Nov. 2018",
doi = "10.1109/SIBGRAPI.2018.00022",
url = "http://dx.doi.org/10.1109/SIBGRAPI.2018.00022",
language = "en",
ibi = "8JMKD3MGPAW/3RN5HUS",
url = "http://urlib.net/ibi/8JMKD3MGPAW/3RN5HUS",
targetfile = "87.pdf",
urlaccessdate = "2024, Apr. 29"
}